1 Problem A - Euler-Maruyama Method

First we implement a discretization for the Black-Scholes-Merton price dynamics with the given parameter set, with \(n = 250\) steps. We generate \(M_1 = 10\), \(M_2 = 100\), \(M_3 = 1000\), \(M_4 = 10000\) and \(M_5 = 100000\) paths and plot the paths in separate figures.

set.seed(061999)
s0 <- 12
T1 <- 1
mu <- 0.03
sigma <- 0.17
n <-  250
t <- seq(0, T1, length.out = n+1) # n steps means n+1 points in this vector. 
h <- t[2] - t[1] # h = T1/n gives the same result. 
M1 <- 10
M2 <- 100
M3 <- 1000
M4 <- 10000
M5 <- 100000

SKJØNNER IKKE HELT HVORFOR DETTE EGENTLIG GJØRES!? MÅ TENKE LITT MER PÅ HVORFOR CUMSUM BRUKES, MEN JEG SKJØNNER AT DET JEG GJORDE FØRST BLIR FEIL, FOR DA AVHENGER DET AV DISKRETISERINGEN!

# Price path stochastic process. 
price.path <- function(s0, t, mu, sigma){
  Wt <- rnorm(n = length(t)) # Draw length(t) standard normally distributed variables. 
  W2 <- cumsum(Wt)*sqrt(t[2]-t[1]) # Step size is constant, grid is uniform. We calculate the cumulative sum of the standard normal draw to simulate the Wiener process, which is N(0,t) at time t, i.e. the variance increases when the process is run further and further away from t = 0. Notice that we also multiply by the (uniform) stepsize used on the (uniform) grid, in order to transform the N(0,1) values to N(0,t). 
  s0*exp(mu*t)*exp(sigma*W2-sigma^2/2*t)
}
# Generate the price paths for all the given number of paths. 
M1.paths <- matrix(rep(NA, length.out = M1*(n+1)), nrow = M1)
for (i in 1:M1){
  M1.paths[i,] <- price.path(s0 = s0, t = t, mu = mu, sigma = sigma)
}

M2.paths <- matrix(rep(NA, length.out = M2*(n+1)), nrow = M2)
for (i in 1:M2){
  M2.paths[i,] <- price.path(s0, t, mu, sigma)
}

M3.paths <- matrix(rep(NA, length.out = M3*(n+1)), nrow = M3)
for (i in 1:M3){
  M3.paths[i,] <- price.path(s0, t, mu, sigma)
}

M4.paths <- matrix(rep(NA, length.out = M4*(n+1)), nrow = M4)
for (i in 1:M4){
  M4.paths[i,] <- price.path(s0, t, mu, sigma)
}

M5.paths <- matrix(rep(NA, length.out = M5*(n+1)), nrow = M5)
for (i in 1:M5){
  M5.paths[i,] <- price.path(s0, t, mu, sigma)
}
#plot(NULL, NULL, main = paste0(M1," paths"), xlim = c(0,1), ylim = c(0, 50), ylab = "S", xlab = "t")

#lines(M1.paths, col = 1:M1)
# df1 <- data.frame(M1.paths)
# df2 <- melt(df1)
# df2$rowid <- 1:M1
# df2 %>% ggplot(aes(variable, value, group=factor(rowid))) + 
#   geom_line(aes(color=factor(rowid)))

# plot(df1[1,])

# Ser ikke noe bedre ut når plottingen er gjort med ggplot. 
#plot(t, M1.paths[1,], type = "l")

# Burde lage denne plotting bedre med ggplot senere kanskje!
# Endre til tid på x-aksen også kanskje!
matplot(t(M1.paths), type = "l", main = paste0(M1," paths"), xlab = "Discretization Points", ylab = "S")
lines(s0*exp(mu*t)) # Drift / "mean value".

# Burde lage denne plotting bedre med ggplot senere kanskje!
# Endre til tid på x-aksen også kanskje!
#M2.p <- tibble(M2.paths)
matplot(t(M2.paths), type = "l", main = paste0(M2," paths"), xlab = "Discretization Points", ylab = "S")
lines(apply(M2.paths, 2, mean), cex = 2)
lines(s0*exp(mu*t))

matplot(t(M3.paths), type = "l", main = paste0(M3," paths"), xlab = "Discretization Points", ylab = "S")

TESTING CODE BELOW COMPARING TO SLIDE 33. SER BEDRE UT NÅ, MEN SKJØNNER IKKE HELT HVORFOR?

n2 <- 200 # points
M <- 10 # paths
Mtest.paths <- matrix(rep(NA, length.out = M*(n2+1)), nrow = M)
t <- seq(0, 5, length.out = n2+1)
s0 <- 10
for (i in 1:M){
  Mtest.paths[i,] <- price.path(s0 = s0, t = t, 
                                mu = 0.05, sigma = 0.05) 
}
#lines(M1.paths, col = 1:M1) 
#df2 <- tibble(M2.paths)
#df1 %>% ggplot()

m <- apply(Mtest.paths, 2, mean)

# Burde lage denne plotting bedre med ggplot senere kanskje!
# Endre til tid på x-aksen også kanskje!
matplot(t(Mtest.paths), type = "l", main = paste0(M," paths"), xlab = "Discretization Points", ylab = "S", ylim = c(0,20))
lines(m)
lines(s0*exp(0.05*t))

DETTE SER MYE BEDRE UT!!

TESTING: Prøvde å få denne til å ligne på plot på slide 33 i Section 2.2, og det ser bedre ut nå.

The Monte Carlo estimator for \(\hat{S}_T\) is calculated separately for each of the values of \(M_i\). The 5% confidence interval is provided for each estimator. A comparison between these estimators and the analytical solution of \(\mathbb{E}(S_T)\) is done and differences are explained.

# Calculate the Monte-Carlo estimator of $\hat{S}_T$ for each of the values of $M_i$.
hat_ST <- function(M, n){
  mean(M[,n+1]) # Calculate mean over last value of all M_i paths. 
}
# This simply calculates the average over the last value of all the different paths. 

CI_ST <- function(M, n){
  s <- sd(M[,n+1]) # Calculate sample standard deviation over last value of all M_i paths. 
  m <- hat_ST(M, n) # Calculate mean.
  ste <- qnorm(0.975)*s/sqrt(dim(M)[[1]])
  return(list(l = m - ste, u = m + ste))
}
M1.hat <- hat_ST(M1.paths, n) 
CI1 <- CI_ST(M1.paths, n)
M2.hat <- hat_ST(M2.paths, n) 
CI2 <- CI_ST(M2.paths, n)
M3.hat <- hat_ST(M3.paths, n) 
CI3 <- CI_ST(M3.paths, n)
M4.hat <- hat_ST(M4.paths, n) 
CI4 <- CI_ST(M4.paths, n)
M5.hat <- hat_ST(M5.paths, n) 
CI5 <- CI_ST(M5.paths, n)

col1 <- rbind(M1.hat, CI1$l, CI1$u)
col2 <- rbind(M2.hat, CI2$l, CI2$u)
col3 <- rbind(M3.hat, CI3$l, CI3$u)
col4 <- rbind(M4.hat, CI4$l, CI4$u)
col5 <- rbind(M5.hat, CI5$l, CI5$u)

hats <- cbind(col1, col2, col3, col4, col5)
colnames(hats) <- c("M1 = 10", "M2 = 100", "M3 = 1000", "M4 = 10000", "M5 = 100000") 
rownames(hats) <- c("Est.", "Lower CI", "Upper CI")
knitr::kable(hats, caption = "Monte Carlo Estimation for S_T, varying M")
Monte Carlo Estimation for S_T, varying M
M1 = 10 M2 = 100 M3 = 1000 M4 = 10000 M5 = 100000
Est. 12.79035 12.27904 12.41963 12.32897 12.36536
Lower CI 11.86917 11.90612 12.28574 12.28727 12.35226
Upper CI 13.71154 12.65195 12.55352 12.37067 12.37845

Now, what is the analytical solution for \(\mathbb{E}(S_T)\)? The analytical solution is

# I think this is the analytical solution, but I am not certain!?
s0*exp(mu+sigma^2/2)
#> [1] 10.45453

Explain the differences and now and why the confidence intervals differ.

Now we fix the number of paths \(M^* = 1000\) and vary the values of n, i.e. the number of steps, while repeating the discussion done above.

M.star <- 1000
n1 <- 12
n2 <- 24
n3 <- 250
n4 <- 1000
# Find new paths. 
n1.paths <- matrix(rep(NA, length.out = M.star*(n1+1)), nrow = M.star)
for (i in 1:M.star){
  t1 <- seq(0, T1, length.out = n1+1) # n steps means n+1 points in this vector. 
  n1.paths[i,] <- price.path(s0, t1, mu, sigma)
}

n2.paths <- matrix(rep(NA, length.out = M.star*(n2+1)), nrow = M.star)
for (i in 1:M.star){
  t2 <- seq(0, T1, length.out = n2+1) # n steps means n+1 points in this vector. 
  n2.paths[i,] <- price.path(s0, t2, mu, sigma)
}

n3.paths <- matrix(rep(NA, length.out = M.star*(n3+1)), nrow = M.star)
for (i in 1:M.star){
  t3 <- seq(0, T1, length.out = n3+1) # n steps means n+1 points in this vector. 
  n3.paths[i,] <- price.path(s0, t3, mu, sigma)
}

n4.paths <- matrix(rep(NA, length.out = M.star*(n4+1)), nrow = M.star)
for (i in 1:M.star){
  t4 <- seq(0, T1, length.out = n4+1) # n steps means n+1 points in this vector. 
  n4.paths[i,] <- price.path(s0, t4, mu, sigma)
}
# Burde nok helst unngå den transponerte, heller definere matrisen omvendt.
matplot(t(n1.paths), type = "l")
matplot(t(n2.paths), type = "l")
matplot(t(n3.paths), type = "l")
matplot(t(n4.paths), type = "l")
n1.hat <- hat_ST(n1.paths, n1) 
CI1 <- CI_ST(n1.paths, n1)
n2.hat <- hat_ST(n2.paths, n2) 
CI2 <- CI_ST(n2.paths, n2)
n3.hat <- hat_ST(n3.paths, n3) 
CI3 <- CI_ST(n3.paths, n3)
n4.hat <- hat_ST(n4.paths, n4) 
CI4 <- CI_ST(n4.paths, n4)

col1 <- rbind(n1.hat, CI1$l, CI1$u)
col2 <- rbind(n2.hat, CI2$l, CI2$u)
col3 <- rbind(n3.hat, CI3$l, CI3$u)
col4 <- rbind(n4.hat, CI4$l, CI4$u)

hats <- cbind(col1, col2, col3, col4)
colnames(hats) <- c("n1 = 12", "n2 = 24", "n3 = 250", "n4 = 1000")
rownames(hats) <- c("Est.", "Lower CI", "Upper CI")
knitr::kable(hats, caption = "Monte Carlo Estimation for S_T, varying n")
Monte Carlo Estimation for S_T, varying n
n1 = 12 n2 = 24 n3 = 250 n4 = 1000
Est. 10.27054 10.26448 10.34359 10.34262
Lower CI 10.15189 10.15526 10.23580 10.23184
Upper CI 10.38919 10.37369 10.45137 10.45339

Yes, the results differ (again). Discuss why!!

2 Problem B - Option Pricing I

set.seed(061999)
s0 <- 24
T1 <- 1.5
r <- 0.02
sigma <- 0.22
K <- 23.5
# BSM pricing formula for European call option, 
# to calculate the fair price at t = 0 in closed form.
BSM <- function(s0, T1, r, sigma, K){
  d1 <- (log(s0/K)+(r+sigma^2/2)*T1)/(sigma*sqrt(T1))
  d2 <- d1 - sigma*sqrt(T1)
  s0*pnorm(d1) - exp(-r*T1)*K*pnorm(d2)  
}

We calculate the price of the given Call option (parameters are given in code block above) with the Black-Scholes-Merton formula.

(BSM.price <- BSM(s0, T1, r, sigma, K))
#> [1] 3.149899

As we can see, the price is approximately equal to 3.15, when rounded to 2 decimals.

We implement a Monte Carlo estimator for this option price by simulating paths with the Euler-Maruyama method for given steps and paths.

Notice that for the path-independent options, like standard European call and put options, it is not needed to save the price path as is done below. This is done to keep the function as general as possible, in order to re-use it for the rest of the assignment. THIS COULD EASILY BE CHANGED IN THIS CASE IF TOO SLOW! (simply overwriting the previous value in the loop in every iteration).

# Monte Carlo estimator for Euler-Maruyama. 
# This is for a Call option still (almost general) (slide 28)
MC <- function(M, n, r, s0, mu, h, sigma, T1, payoff.profile, set.S, ...){
  C.values <- rep(NA, length.out = M) # Initiate vector for saving payoffs per path. 
  for (i in 1:M){ # Initiate loop for M path.
    Z <- rnorm(n+1) # Generate n random variables standard normally. 
    S.values <- rep(NA, length.out = n+1) # Initiate vector for saving price path. 
    S.values[1] <- s0 # Initiate starting price at time t = 0. 
    for (j in 2:(n+1)){ # Loop for Euler-Maruyama - recursively estimate price path. 
      S.values[j] <- S.values[j-1] + mu*S.values[j-1]*h + sigma*S.values[j-1]*sqrt(h)*Z[j-1]
    }
    S <- set.S(S.values) # Set expected price used in payoff-profile. 
    # Generality is kept using a helper function set.S.
    C.values[i] <- payoff.profile(S, K) # Calculate payoff for the path and save in C.values.
  }
  exp(-r*T1)*mean(C.values) # Return discounted average payoff. This is the MC estimator. 
}
# Definition of helper functions to feed into MC estimator code above. 
Call.profile <- function(S, K){
  max(0, S-K)
}

# Feed the function into the MC estimator in order to calculate estimate for Call option. 
Call.set.S <- function(S.values){
  S.values[length(S.values)] # The Call option uses the last value in the list. 
}

Assuming that \(\mu = r\).

M.list <- c(10, 100, 1000, 10000, 100000)
n.list <- c(10, 100, 1000)
# Simulate paths with the MC estimator and Euler-Maruyama. 
combinations <- matrix(rep(NA, length.out = length(M.list)*length(n.list)), nrow = length(M.list))

for (i in 1:length(M.list)){
  for (j in 1:length(n.list)){
    h <- T1/n.list[j]
    combinations[i, j] <- MC(M = M.list[i], n = n.list[j], r = r, s0 = s0, mu = r, h = h, 
                             sigma = sigma, T1 = T1, payoff.profile = Call.profile, set.S = Call.set.S, K = K)
  }
}
# Calculations of relative error and absolute error.
rel.errors <- (BSM.price - combinations)/BSM.price
abs.errors <- abs(BSM.price - combinations)
# Tabulate the results.
df.rel <- data.frame(rel.errors)
rownames(df.rel) <- c("M = 10", "M = 100", "M = 1000", "M = 10000", "M = 100000")
colnames(df.rel) <- c("n = 10", "n = 100", "n = 1000")

df.abs <- data.frame(abs.errors)
rownames(df.abs) <- c("M = 10", "M = 100", "M = 1000", "M = 10000", "M = 100000")
colnames(df.abs) <- c("n = 10", "n = 100", "n = 1000")

knitr::kable(df.rel, caption = "Relative Errors")
Relative Errors
n = 10 n = 100 n = 1000
M = 10 -0.5821167 0.5566707 0.6125545
M = 100 0.0255317 0.0021910 -0.3364722
M = 1000 0.0522963 0.0273627 0.0263294
M = 10000 -0.0066211 0.0508549 0.0291815
M = 100000 -0.0035652 -0.0019417 0.0029947
knitr::kable(df.abs, caption = "Absolute Errors")
Absolute Errors
n = 10 n = 100 n = 1000
M = 10 1.8336091 1.7534568 1.9294852
M = 100 0.0804222 0.0069013 1.0598536
M = 1000 0.1647280 0.0861897 0.0829349
M = 10000 0.0208557 0.1601878 0.0919187
M = 100000 0.0112299 0.0061162 0.0094331

Interpretation of results in view of \(n\) and \(M\).

TESTER MED EKSEMPLER I SLIDES!

# slide 21 Section 3
mu <- 0.05
sigma <- 0.2
s0 <- 10
T1 <- 1
n <- 5
h <- 1/n

Z <- rnorm(n)
Z <- c(0.5377, 1.8339, -2.2588, 0.8622, 0.3188)
S.values <- rep(NA, length.out = n+1)
S.values[1] <- s0
for (j in 2:(n+1)){
  S.values[j] <- S.values[j-1] + mu*S.values[j-1]*h + sigma*S.values[j-1]*sqrt(h)*Z[j-1]
}
S.values
S.values[length(S.values)]
# Correct!
# This means that the inner loop with the Euler Method is correctly implemented at least!
MC <- function(M, n, r, s0, mu, h, sigma, T1, payoff.profile, set.S, ...){
  C.values <- rep(NA, length.out = M)
  for (i in 1:M){
    Z <- rnorm(n)
    S.values <- rep(NA, length.out = n+1)
    S.values[1] <- s0
    for (j in 2:(n+1)){
      S.values[j] <- S.values[j-1] + mu*S.values[j-1]*h + sigma*S.values[j-1]*sqrt(h)*Z[j-1]
    }
    S <- set.S(S.values) # Generality is kept using a helper function set.S.
    C.values[i] <- payoff.profile(S, K)
  }
  exp(-r*T1)*mean(C.values) # Return discounted average payoff
}

3 Problem C - Option Pricing II

A Monte Carlo estimator is implemented for a specific Asian Call Option. This Asian Call Option averages prices every Friday. We set \(n = 252\) and assume that \(n = 1\) is Monday. This means that we average the prices \(t_5, t_{10}, t_{15}, \ldots\). The paypff profile for this option is thus \((\bar{S}-K)^+\), where \(\bar{S}\) is the arithmetic average over all the prices \(t_i, \hspace{0.2em} i \in \{5, 10, 15, \ldots\}\). We set \(M = 10000\) and use the parameter set \((s_0, T, r, \sigma, K) = (20, 1, 0.02, 0.24, 20)\).

set.seed(061999)
s0 <- 20
T1 <- 1
r <- 0.02
sigma <- 0.24
K <- 20
n <- 252

# This function can be fed into the MC estimator instead. 
Asian.set.S <- function(S.values){ 
  # We extract every 5th value and calculate their arithmetic mean.
  mean(S.values[seq(5, length(S.values), 5)])
}

# Feed the payoff profile into the MC estimator. 
# This is the same as the Call.profile above, so not needed!
Asian.profile <- function(S, K){
  max(0, S-K)
}

# Still assuming that mu = r.
Asian.price <- MC(M = 10000, n = 252, r = 0.02, s0 = 20, mu = 0.02, h = 1/252, 
   sigma = 0.24, T1 = 1, Asian.profile, Asian.set.S, K = 20)

As we can see from the result above, the price estimated via the MC estimator is 1.17. THIS PRICE CAN PERHAPS BE CHECKED WITH A CALCULATOR ONLINE!

A Monte Carlo estimator is implemented for a Lookback option with payoff profile \((S_{max}-K)^+\), where \(S_{max}\) refers to the maximum price during the time to maturity of the option. We use the parameter set \((s_0, T, r, \sigma, K) = (22, 2, 0.02, 0.29, 20)\) THIS K IS NOT SPECIFIED IN THE PROBLEM DESCRIPTION, BUT I WILL JUST LEAVE IT HERE NOW!

set.seed(061999)
s0 <- 22
T1 <- 2
r <- 0.02
sigma <- 0.29
K <- 20
n <- 1000
M1 <- 1000
M2 <- 10000
M3 <- 100000

# This function can be fed into the MC estimator instead. 
lookback.set.S <- function(S.values){ 
  # We return the maximum value among the S.values. 
  max(S.values)
}

# Feed the payoff profile into the MC estimator. 
# This is the same as the Call.profile above, so not needed!
lookback.profile <- function(S, K){
  max(0, S-K)
}

# Still assuming that mu = r.
lookback.price <- MC(M = M2, n = n, r = s0, s0 = s0, mu = r, h = 1/M2, 
   sigma = sigma, T1 = T1, lookback.profile, lookback.set.S, K = 20)

As we can see from the result above, the price estimated via the MC estimator is 0. THIS PRICE CAN PERHAPS BE CHECKED WITH A CALCULATOR ONLINE!

# We calculate CIs of this option price for the different values of M.